From d24d3ad46184ee860cb7b2952e9955c7f3284eff Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Sat, 5 Nov 2005 11:30:01 +0100 Subject: [PATCH] This patch is intended to make qemu support ia64/vti. We have validated it against latest xen-unstable.hg, both ia32 and x86-64 are not affected. Signed-off-by Ke Yu Signed-off-by Kevin Tian Signed-off-by Nakajima Jun Signed-off-by Anthony Xu --- tools/ioemu/cpu-all.h | 41 ++++ tools/ioemu/cpu.h | 4 + tools/ioemu/exec-all.h | 9 + tools/ioemu/exec.c | 19 ++ tools/ioemu/hw/i8259_stub.c | 15 -- tools/ioemu/hw/iommu.c | 4 + tools/ioemu/hw/vga.c | 4 + tools/ioemu/ia64_intrinsic.h | 275 +++++++++++++++++++++++++++ tools/ioemu/target-i386-dm/helper2.c | 8 - tools/ioemu/vl.c | 47 ++++- 10 files changed, 400 insertions(+), 26 deletions(-) create mode 100644 tools/ioemu/ia64_intrinsic.h diff --git a/tools/ioemu/cpu-all.h b/tools/ioemu/cpu-all.h index 7374c2e8a0..75544bee28 100644 --- a/tools/ioemu/cpu-all.h +++ b/tools/ioemu/cpu-all.h @@ -625,6 +625,47 @@ int cpu_inw(CPUState *env, int addr); int cpu_inl(CPUState *env, int addr); #endif +#if defined(__i386__) || defined(__x86_64__) +static __inline__ void atomic_set_bit(long nr, volatile void *addr) +{ + __asm__ __volatile__( + "lock ; bts %1,%0" + :"=m" (*(volatile long *)addr) + :"dIr" (nr)); +} +static __inline__ void atomic_clear_bit(long nr, volatile void *addr) +{ + __asm__ __volatile__( + "lock ; btr %1,%0" + :"=m" (*(volatile long *)addr) + :"dIr" (nr)); +} +#elif defined(__ia64__) +#include "ia64_intrinsic.h" +#define atomic_set_bit(nr, addr) ({ \ + typeof(*addr) bit, old, new; \ + volatile typeof(*addr) *m; \ + \ + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \ + bit = 1 << (nr % (8*sizeof(*addr))); \ + do { \ + old = *m; \ + new = old | bit; \ + } while (cmpxchg_acq(m, old, new) != old); \ +}) + +#define atomic_clear_bit(nr, addr) ({ \ + typeof(*addr) bit, old, new; \ + volatile typeof(*addr) *m; \ + \ + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \ + bit = ~(1 << (nr % (8*sizeof(*addr)))); \ + do { \ + old = *m; \ + new = old & bit; \ + } while (cmpxchg_acq(m, old, new) != old); \ +}) +#endif /* memory API */ extern int phys_ram_size; diff --git a/tools/ioemu/cpu.h b/tools/ioemu/cpu.h index adeb5bdd93..a3e24f2ea9 100644 --- a/tools/ioemu/cpu.h +++ b/tools/ioemu/cpu.h @@ -63,7 +63,11 @@ int cpu_get_pic_interrupt(CPUX86State *s); /* MSDOS compatibility mode FPU exception support */ void cpu_set_ferr(CPUX86State *s); +#if defined(__i386__) || defined(__x86_64__) #define TARGET_PAGE_BITS 12 +#elif defined(__ia64__) +#define TARGET_PAGE_BITS 14 +#endif #include "cpu-all.h" #endif /* CPU_I386_H */ diff --git a/tools/ioemu/exec-all.h b/tools/ioemu/exec-all.h index ac0533982d..0e5ab710d6 100644 --- a/tools/ioemu/exec-all.h +++ b/tools/ioemu/exec-all.h @@ -433,6 +433,15 @@ static inline int testandset (int *p) } #endif +#ifdef __ia64__ +#include "ia64_intrinsic.h" +static inline int testandset (int *p) +{ + uint32_t o = 0, n = 1; + return (int)cmpxchg_acq(p, o, n); +} +#endif + #ifdef __s390__ static inline int testandset (int *p) { diff --git a/tools/ioemu/exec.c b/tools/ioemu/exec.c index 64cc7ef005..de0618b09e 100644 --- a/tools/ioemu/exec.c +++ b/tools/ioemu/exec.c @@ -360,6 +360,22 @@ int iomem_index(target_phys_addr_t addr) return 0; } +#ifdef __ia64__ +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller. + * So to emulate right behavior that guest OS is assumed, we need to flush + * I/D cache here. + */ +static void sync_icache(unsigned long address, int len) +{ + int l; + for(l = 0; l < (len + 32); l += 32) + __ia64_fc(address + l); + + ia64_sync_i(); + ia64_srlz_i(); +} +#endif + void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write) { @@ -402,6 +418,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, /* RAM case */ ptr = phys_ram_base + addr1; memcpy(ptr, buf, l); +#ifdef __ia64__ + sync_icache((unsigned long)ptr,l); +#endif } } else { if (io_index) { diff --git a/tools/ioemu/hw/i8259_stub.c b/tools/ioemu/hw/i8259_stub.c index d0cfa1e4c6..7dd0062803 100644 --- a/tools/ioemu/hw/i8259_stub.c +++ b/tools/ioemu/hw/i8259_stub.c @@ -27,21 +27,6 @@ #include "cpu.h" #include "cpu-all.h" -static __inline__ void atomic_set_bit(long nr, volatile void *addr) -{ - __asm__ __volatile__( - "lock ; bts %1,%0" - :"=m" (*(volatile long *)addr) - :"dIr" (nr)); -} -static __inline__ void atomic_clear_bit(long nr, volatile void *addr) -{ - __asm__ __volatile__( - "lock ; btr %1,%0" - :"=m" (*(volatile long *)addr) - :"dIr" (nr)); -} - #include extern shared_iopage_t *shared_page; extern CPUState *global_env; diff --git a/tools/ioemu/hw/iommu.c b/tools/ioemu/hw/iommu.c index a9249c4ba7..951a840d85 100644 --- a/tools/ioemu/hw/iommu.c +++ b/tools/ioemu/hw/iommu.c @@ -107,7 +107,11 @@ struct iommu_regs { #define IOPTE_VALID 0x00000002 /* IOPTE is valid */ #define IOPTE_WAZ 0x00000001 /* Write as zeros */ +#if defined(__i386__) || defined(__x86_64__) #define PAGE_SHIFT 12 +#elif defined(__ia64__) +#define PAGE_SHIFT 14 +#endif #define PAGE_SIZE (1 << PAGE_SHIFT) #define PAGE_MASK (PAGE_SIZE - 1) diff --git a/tools/ioemu/hw/vga.c b/tools/ioemu/hw/vga.c index a5084e0e67..a18039ef63 100644 --- a/tools/ioemu/hw/vga.c +++ b/tools/ioemu/hw/vga.c @@ -1879,7 +1879,11 @@ void vga_common_init(VGAState *s, DisplayState *ds, uint8_t *vga_ram_base, /* qemu's vga mem is not detached from phys_ram_base and can cause DM abort * when guest write vga mem, so allocate a new one */ +#if defined(__i386__) || defined(__x86_64__) s->vram_ptr = shared_vram; +#else + s->vram_ptr = qemu_malloc(vga_ram_size); +#endif s->vram_offset = vga_ram_offset; s->vram_size = vga_ram_size; diff --git a/tools/ioemu/ia64_intrinsic.h b/tools/ioemu/ia64_intrinsic.h new file mode 100644 index 0000000000..c0d637ff85 --- /dev/null +++ b/tools/ioemu/ia64_intrinsic.h @@ -0,0 +1,275 @@ +#ifndef IA64_INTRINSIC_H +#define IA64_INTRINSIC_H + +/* + * Compiler-dependent Intrinsics + * + * Copyright (C) 2002,2003 Jun Nakajima + * Copyright (C) 2002,2003 Suresh Siddha + * + */ +extern long ia64_cmpxchg_called_with_bad_pointer (void); +extern void ia64_bad_param_for_getreg (void); +#define ia64_cmpxchg(sem,ptr,o,n,s) ({ \ + uint64_t _o, _r; \ + switch(s) { \ + case 1: _o = (uint8_t)(long)(o); break; \ + case 2: _o = (uint16_t)(long)(o); break; \ + case 4: _o = (uint32_t)(long)(o); break; \ + case 8: _o = (uint64_t)(long)(o); break; \ + default: break; \ + } \ + switch(s) { \ + case 1: \ + _r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break; \ + case 2: \ + _r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break; \ + case 4: \ + _r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break; \ + case 8: \ + _r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break; \ + default: \ + _r = ia64_cmpxchg_called_with_bad_pointer(); break; \ + } \ + (__typeof__(o)) _r; \ +}) + +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr)) +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr)) + +/* + * Register Names for getreg() and setreg(). + * + * The "magic" numbers happen to match the values used by the Intel compiler's + * getreg()/setreg() intrinsics. + */ + +/* Special Registers */ + +#define _IA64_REG_IP 1016 /* getreg only */ +#define _IA64_REG_PSR 1019 +#define _IA64_REG_PSR_L 1019 + +/* General Integer Registers */ + +#define _IA64_REG_GP 1025 /* R1 */ +#define _IA64_REG_R8 1032 /* R8 */ +#define _IA64_REG_R9 1033 /* R9 */ +#define _IA64_REG_SP 1036 /* R12 */ +#define _IA64_REG_TP 1037 /* R13 */ + +/* Application Registers */ + +#define _IA64_REG_AR_KR0 3072 +#define _IA64_REG_AR_KR1 3073 +#define _IA64_REG_AR_KR2 3074 +#define _IA64_REG_AR_KR3 3075 +#define _IA64_REG_AR_KR4 3076 +#define _IA64_REG_AR_KR5 3077 +#define _IA64_REG_AR_KR6 3078 +#define _IA64_REG_AR_KR7 3079 +#define _IA64_REG_AR_RSC 3088 +#define _IA64_REG_AR_BSP 3089 +#define _IA64_REG_AR_BSPSTORE 3090 +#define _IA64_REG_AR_RNAT 3091 +#define _IA64_REG_AR_FCR 3093 +#define _IA64_REG_AR_EFLAG 3096 +#define _IA64_REG_AR_CSD 3097 +#define _IA64_REG_AR_SSD 3098 +#define _IA64_REG_AR_CFLAG 3099 +#define _IA64_REG_AR_FSR 3100 +#define _IA64_REG_AR_FIR 3101 +#define _IA64_REG_AR_FDR 3102 +#define _IA64_REG_AR_CCV 3104 +#define _IA64_REG_AR_UNAT 3108 +#define _IA64_REG_AR_FPSR 3112 +#define _IA64_REG_AR_ITC 3116 +#define _IA64_REG_AR_PFS 3136 +#define _IA64_REG_AR_LC 3137 +#define _IA64_REG_AR_EC 3138 + +/* Control Registers */ + +#define _IA64_REG_CR_DCR 4096 +#define _IA64_REG_CR_ITM 4097 +#define _IA64_REG_CR_IVA 4098 +#define _IA64_REG_CR_PTA 4104 +#define _IA64_REG_CR_IPSR 4112 +#define _IA64_REG_CR_ISR 4113 +#define _IA64_REG_CR_IIP 4115 +#define _IA64_REG_CR_IFA 4116 +#define _IA64_REG_CR_ITIR 4117 +#define _IA64_REG_CR_IIPA 4118 +#define _IA64_REG_CR_IFS 4119 +#define _IA64_REG_CR_IIM 4120 +#define _IA64_REG_CR_IHA 4121 +#define _IA64_REG_CR_LID 4160 +#define _IA64_REG_CR_IVR 4161 /* getreg only */ +#define _IA64_REG_CR_TPR 4162 +#define _IA64_REG_CR_EOI 4163 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */ +#define _IA64_REG_CR_IRR1 4165 /* getreg only */ +#define _IA64_REG_CR_IRR2 4166 /* getreg only */ +#define _IA64_REG_CR_IRR3 4167 /* getreg only */ +#define _IA64_REG_CR_ITV 4168 +#define _IA64_REG_CR_PMV 4169 +#define _IA64_REG_CR_CMCV 4170 +#define _IA64_REG_CR_LRR0 4176 +#define _IA64_REG_CR_LRR1 4177 + +/* Indirect Registers for getindreg() and setindreg() */ + +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ +#define _IA64_REG_INDR_DBR 9001 +#define _IA64_REG_INDR_IBR 9002 +#define _IA64_REG_INDR_PKR 9003 +#define _IA64_REG_INDR_PMC 9004 +#define _IA64_REG_INDR_PMD 9005 +#define _IA64_REG_INDR_RR 9006 + +#ifdef __INTEL_COMPILER +void __fc(uint64_t *addr); +void __synci(void); +void __isrlz(void); +void __dsrlz(void); +uint64_t __getReg(const int whichReg); +uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp); +uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp); +u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp); + +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq + +#define ia64_srlz_d __dsrlz +#define ia64_srlz_i __isrlz +#define __ia64_fc __fc +#define ia64_sync_i __synci +#define __ia64_getreg __getReg +#else /* __INTEL_COMPILER */ +#define ia64_cmpxchg1_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_acq(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_rel(ptr, new, old) \ +({ \ + uint64_t ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); +#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") + +#define __ia64_getreg(regnum) \ +({ \ + uint64_t ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_GP: \ + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_IP: \ + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_PSR: \ + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_TP: /* for current() */ \ + ia64_intri_res = ia64_r13; \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \ + : "i"(regnum - _IA64_REG_AR_KR0)); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \ + : "i" (regnum - _IA64_REG_CR_DCR)); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \ + break; \ + default: \ + ia64_bad_param_for_getreg(); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#endif /* __INTEL_COMPILER */ +#endif /* IA64_INTRINSIC_H */ diff --git a/tools/ioemu/target-i386-dm/helper2.c b/tools/ioemu/target-i386-dm/helper2.c index 6a97262715..8c1e085160 100644 --- a/tools/ioemu/target-i386-dm/helper2.c +++ b/tools/ioemu/target-i386-dm/helper2.c @@ -389,14 +389,6 @@ cpu_handle_ioreq(CPUState *env) int xc_handle; -static __inline__ void atomic_set_bit(long nr, volatile void *addr) -{ - __asm__ __volatile__( - "lock ; bts %1,%0" - :"=m" (*(volatile long *)addr) - :"dIr" (nr)); -} - void destroy_vmx_domain(void) { diff --git a/tools/ioemu/vl.c b/tools/ioemu/vl.c index 224d1929c1..3ea7828412 100644 --- a/tools/ioemu/vl.c +++ b/tools/ioemu/vl.c @@ -22,6 +22,9 @@ * THE SOFTWARE. */ #include "vl.h" +#ifdef __ia64__ +#include +#endif #include #include @@ -518,6 +521,11 @@ int64_t cpu_get_real_ticks(void) return val; } +#elif defined(__ia64__) +#include "ia64_intrinsic.h" +#define cpu_get_reak_ticks() \ + ia64_getreg(_IA64_REG_AR_ITC) + #else #error unsupported CPU #endif @@ -2375,6 +2383,7 @@ static uint8_t *signal_stack; #include +#if defined(__i386__) || defined (__x86_64__) #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER) #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) @@ -2544,6 +2553,10 @@ void unset_vram_mapping(unsigned long addr, unsigned long end) /* FIXME Flush the shadow page */ unsetup_mapping(xc_handle, domid, toptab, addr, end); } +#elif defined(__ia64__) +void set_vram_mapping(unsigned long addr, unsigned long end) {} +void unset_vram_mapping(unsigned long addr, unsigned long end) {} +#endif int main(int argc, char **argv) { @@ -3018,9 +3031,14 @@ int main(int argc, char **argv) phys_ram_size = ram_size + vga_ram_size + bios_size; ram_pages = ram_size/PAGE_SIZE; +#if defined(__i386__) || defined(__x86_64__) vgaram_pages = (vga_ram_size -1)/PAGE_SIZE + 1; free_pages = vgaram_pages / L1_PAGETABLE_ENTRIES; extra_pages = vgaram_pages + free_pages; +#else + /* Test vga acceleration later */ + extra_pages = 0; +#endif xc_handle = xc_interface_open(); @@ -3049,6 +3067,7 @@ int main(int argc, char **argv) exit(-1); } +#if defined(__i386__) || defined(__x86_64__) if ( xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages ) { perror("xc_get_pfn_list"); @@ -3077,8 +3096,6 @@ int main(int argc, char **argv) exit(-1); } - - memset(shared_vram, 0, vgaram_pages * PAGE_SIZE); toptab = page_array[ram_pages] << PAGE_SHIFT; @@ -3087,7 +3104,31 @@ int main(int argc, char **argv) page_array[ram_pages]); freepage_array = &page_array[nr_pages - extra_pages]; - +#elif defined(__ia64__) + if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, 0, ram_pages) != ram_pages) + { + perror("xc_ia64_get_pfn_list"); + exit(-1); + } + + if ((phys_ram_base = xc_map_foreign_batch(xc_handle, domid, + PROT_READ|PROT_WRITE, + page_array, + ram_pages)) == 0) { + perror("xc_map_foreign_batch"); + exit(-1); + } + + if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, IO_PAGE_START>>PAGE_SHIFT, 1) != 1) + { + perror("xc_ia64_get_pfn_list"); + exit(-1); + } + + shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE, + PROT_READ|PROT_WRITE, + page_array[0]); +#endif fprintf(logfile, "shared page at pfn:%lx, mfn: %lx\n", (nr_pages-1), (page_array[nr_pages - 1])); -- 2.30.2